Solution Code

An example of my code for this exercise is here.


In [1]:
import numpy as np

# Define a softmax function
def softmax(x):
    p=np.exp(x)/np.sum(np.exp(x),axis=0) 
    return p

# Define a linearClassifier
def linearClassifier(x,W,b):
    # The np.dot() is needed for the matrix product
    # of the weights and feature vector
    return softmax(np.dot(W,x)+b)

# Test softmax() out on a vector
test=[1,3,2]
print(softmax(test))

# Again on a matrix
test=np.ones((3,4))
test[0,:]=2.
print(test)
print(softmax(test))

# Test linearClassifier out on an array
W=np.ones((3,4))
W[0,:]=2.
x=np.asarray([1,0,1,0])
b=np.asarray([1,1,2])
print(x)
print(W)
print(b)
print(linearClassifier(x,W,b))

# Define a class
# ---------------------------
class linearClassifier:
        def __init__(self, W, b):
                self.W = W
                self.b = b

        def predict(self,x):
                W = self.W
                b = self.b
                return softmax(np.dot(W,x)+b)

myLC=linearClassifier(W,b)
print(myLC.predict(x))


[ 0.09003057  0.66524096  0.24472847]
[[ 2.  2.  2.  2.]
 [ 1.  1.  1.  1.]
 [ 1.  1.  1.  1.]]
[[ 0.57611688  0.57611688  0.57611688  0.57611688]
 [ 0.21194156  0.21194156  0.21194156  0.21194156]
 [ 0.21194156  0.21194156  0.21194156  0.21194156]]
[1 0 1 0]
[[ 2.  2.  2.  2.]
 [ 1.  1.  1.  1.]
 [ 1.  1.  1.  1.]]
[1 1 2]
[ 0.66524096  0.09003057  0.24472847]
[ 0.66524096  0.09003057  0.24472847]